Another potentially interesting question we can try to answer is how much face representation we see across the task. In order to do so, we’ve trained a linear SVM classifier within subjects on the data from the smoothed FFA localizer to classify signal into faces, objects and scrambles. We can then apply that classifier to various facets of our data. For each of these analyses, we will look at the probability of the classifier predicting a face. If the classifier does indeed predict a face, we score that TR with a “1”, otherwise, it gets a “0”, meaning chance becomes 1/3 = .33.
First, we will apply it to each TR of individual trials. Trials are split into 4 bins based on accuracy and load, and averaged over those measures to create a single time course for each category. The classifier was also applied to each TR of a “template” for each condition. In this analysis, all trials in a given condition were averaged to create a prototypical example for the category. The classifier was then applied to those averages.
We can then look at the probability of classification across subjects. First, we look at it across all subjects, but then we can look at it across our working memory capacity groups.
Finally, we will relate these neural measures to behavior, both averaged over time and for each TR.
library(reshape2)
library(tidyverse)
## ── Attaching packages ──────────────────────────────────────────────────────────────────────────────────────── tidyverse 1.3.0 ──
## ✓ ggplot2 3.3.2 ✓ purrr 0.3.4
## ✓ tibble 3.0.3 ✓ dplyr 1.0.1
## ✓ tidyr 1.1.1 ✓ stringr 1.4.0
## ✓ readr 1.3.1 ✓ forcats 0.5.0
## ── Conflicts ─────────────────────────────────────────────────────────────────────────────────────────── tidyverse_conflicts() ──
## x dplyr::filter() masks stats::filter()
## x dplyr::lag() masks stats::lag()
library(patchwork)
load('data/behav.RData')
load('data/split_groups_info.RData')
load('data/DFR_split_groups_info.RData')
source("helper_fxns/split_into_groups.R")
source('helper_fxns/prep_trial_levels_for_plot.R')
source("helper_fxns/split_trial_type.R")
se <- function(x) {
sd(x,na.rm=TRUE)/sqrt(length(x[!is.na(x)]))
}
#classifier information
clf_acc <- read.csv('data/MVPA/DFR_unsmoothed/clf_acc.csv', header=FALSE)
best_c <- read.csv('data/MVPA/DFR_unsmoothed/best_C.csv', header=FALSE)
trial_acc <- read.csv("data/MVPA/DFR_unsmoothed/all_suj_acc.csv", header = FALSE)
colnames(trial_acc) <- c('low correct', 'low incorrect', 'high correct', 'high incorrect')
# averaages from template
averages_from_template <- list(high_correct = read.csv('data/MVPA/DFR_unsmoothed/all_suj_high_correct_avg.csv',header=FALSE),
high_incorrect = read.csv('data/MVPA/DFR_unsmoothed/all_suj_high_incorrect_avg.csv',header=FALSE),
low_correct = read.csv('data/MVPA/DFR_unsmoothed/all_suj_low_correct_avg.csv',header=FALSE),
low_incorrect = read.csv('data/MVPA/DFR_unsmoothed/all_suj_low_incorrect_avg.csv',header=FALSE))
# only want to look at subjects who have at least 4 low load incorrect trials for those analyses
min_low_incorrect <- which(trial_acc$`low incorrect` < 4)
averages_from_template[["low_incorrect"]][min_low_incorrect, 1:14] <- NA
averages_from_template[["high_load_correct_diff"]] <- averages_from_template[["high_correct"]][,1:14] - averages_from_template[["high_incorrect"]][,1:14]
averages_from_template[["low_load_correct_diff"]] <- averages_from_template[["low_correct"]][,1:14] - averages_from_template[["low_incorrect"]][,1:14]
# averages from individual trials
individual_trial_averages_probs <- list(
high_correct = read.csv('data/MVPA/DFR_unsmoothed/all_suj_high_correct_indiv_avg_probs.csv',header=FALSE),
high_incorrect = read.csv('data/MVPA/DFR_unsmoothed/all_suj_high_incorrect_indiv_avg_probs.csv',header=FALSE),
low_correct = read.csv('data/MVPA/DFR_unsmoothed/all_suj_low_correct_indiv_avg_probs.csv',header=FALSE),
low_incorrect = read.csv('data/MVPA/DFR_unsmoothed/all_suj_low_incorrect_indiv_avg_probs.csv',header=FALSE))
individual_trial_averages_probs[["low_incorrect"]][min_low_incorrect, 1:14] <- NA
individual_trial_averages_probs[["high_load_correct_diff"]] <- individual_trial_averages_probs[["high_correct"]][,1:14] - individual_trial_averages_probs[["high_incorrect"]][,1:14]
individual_trial_averages_probs[["low_load_correct_diff"]] <- individual_trial_averages_probs[["low_correct"]][,1:14] - individual_trial_averages_probs[["low_incorrect"]][,1:14]
# averages from individual trials
individual_trial_averages_preds <- list(
high_correct = read.csv('data/MVPA/DFR_unsmoothed/all_suj_high_correct_indiv_avg_preds.csv',header=FALSE),
high_incorrect = read.csv('data/MVPA/DFR_unsmoothed/all_suj_high_incorrect_indiv_avg_preds.csv',header=FALSE),
low_correct = read.csv('data/MVPA/DFR_unsmoothed/all_suj_low_correct_indiv_avg_preds.csv',header=FALSE),
low_incorrect = read.csv('data/MVPA/DFR_unsmoothed/all_suj_low_incorrect_indiv_avg_preds.csv',header=FALSE))
individual_trial_averages_preds[["low_incorrect"]][min_low_incorrect, 1:14] <- NA
individual_trial_averages_preds[["high_load_correct_diff"]] <- individual_trial_averages_preds[["high_correct"]][,1:14] - individual_trial_averages_preds[["high_incorrect"]][,1:14]
individual_trial_averages_preds[["low_load_correct_diff"]] <- individual_trial_averages_preds[["low_correct"]][,1:14] - individual_trial_averages_preds[["low_incorrect"]][,1:14]
# replace NaNs with NA, add in PTID
for (i in seq.int(1,6)){
for (ii in seq.int(1,14)){
averages_from_template[[i]][is.nan(averages_from_template[[i]][,ii]),ii] <- NA
individual_trial_averages_probs[[i]][is.nan(individual_trial_averages_probs[[i]][,ii]),ii] <- NA
individual_trial_averages_preds[[i]][is.nan(individual_trial_averages_preds[[i]][,ii]),ii] <- NA
}
averages_from_template[[i]]$PTID <- constructs_fMRI$PTID
individual_trial_averages_probs[[i]]$PTID <- constructs_fMRI$PTID
individual_trial_averages_preds[[i]]$PTID <- constructs_fMRI$PTID
}
save(list=c("clf_acc", "best_c", "averages_from_template", "individual_trial_averages_probs","individual_trial_averages_preds"), file = "data/MVPA_DFR_delay_mask.RData")
On average, we were able to classify faces with 63.2% accuracy (statistically significantly different from chance = 0.33). The classifier was trained on data from an independent FFA localizer. Data was masked derived from the high > low load contrast during the DFR task - that is, regions that are sensitive to load during the delay period. From that mask, the top 100 voxels based on the faces vs objects contrast in the overall subject GLM were selected as features for the classifier. The data used to train the classifier were shifted by 4.5 seconds to account for the hemodynamic delay.
A linear SVM classifer was used; the localizer task was split into 6 blocks of stimuli. These blocks were used in a pre-defined split for cross validation, where one block of each stimulus type was held out as a test set. Data were normalized within the training and test sets separately. Within this training set, another cross validation process was repeated to tune the cost of the model over the values [0.01, 0.1, 1, 10]. The best value of the cost function was used for each cross validation to score the classifier on the test set. The best classifer was also used to predict face presence at each TR during the DFR task.
clf_acc$average <- rowMeans(clf_acc)
t.test(clf_acc$average,mu=0.33)
##
## One Sample t-test
##
## data: clf_acc$average
## t = 31.178, df = 169, p-value < 2.2e-16
## alternative hypothesis: true mean is not equal to 0.33
## 95 percent confidence interval:
## 0.6142313 0.6526582
## sample estimates:
## mean of x
## 0.6334447
ggplot(data = clf_acc, aes(x = average))+
geom_histogram()+
geom_vline(aes(xintercept=0.33), linetype="dotted")+
theme_classic()+
xlab("Average classifier accuracy")
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
template_preds_melt <- prep_trial_levels_for_plot(averages_from_template)
## Using level as id variables
individual_trial_probs_melt <- prep_trial_levels_for_plot(individual_trial_averages_probs)
## Using level as id variables
individual_trial_preds_melt <- prep_trial_levels_for_plot(individual_trial_averages_preds)
## Using level as id variables
The shape of the time course is different here than it was for the fusiform region - here, we’re well below chance for encoding, but start to see a significant probability during delay (starting around TR 8) and the probe.
During delay period, there is no difference in probability of classifying face across load, but we do see significantly higher probability of classifying a face in correct vs incorrect trials.
ggplot(data=individual_trial_probs_melt %>% filter(level %in% c("high_correct", "high_incorrect", "low_correct", "low_incorrect")),aes(x=TR,y=value))+
geom_line(aes(color=level))+
geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ylab("Probability of classifier predicting a face")+
theme_classic()
delay_level_avg <- data.frame(high = rowMeans(cbind(individual_trial_averages_probs[["high_correct"]]$V8, individual_trial_averages_probs[["high_incorrect"]]$V8), na.rm=TRUE), low = rowMeans(cbind(individual_trial_averages_probs[["low_correct"]]$V8, individual_trial_averages_probs[["low_incorrect"]]$V8),na.rm=TRUE))
t.test(delay_level_avg$high,delay_level_avg$low,paired=TRUE)
##
## Paired t-test
##
## data: delay_level_avg$high and delay_level_avg$low
## t = -3.2373, df = 169, p-value = 0.001452
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.05307574 -0.01286545
## sample estimates:
## mean of the differences
## -0.03297059
delay_acc_avg <- data.frame(correct = rowMeans(cbind(individual_trial_averages_probs[["high_correct"]]$V8, individual_trial_averages_probs[["low_correct"]]$V8), na.rm=TRUE), incorrect = rowMeans(cbind(individual_trial_averages_probs[["low_incorrect"]]$V8, individual_trial_averages_probs[["high_incorrect"]]$V8),na.rm=TRUE))
t.test(delay_acc_avg$correct,delay_acc_avg$incorrect,paired=TRUE)
##
## Paired t-test
##
## data: delay_acc_avg$correct and delay_acc_avg$incorrect
## t = 3.5569, df = 169, p-value = 0.0004871
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## 0.01821507 0.06365132
## sample estimates:
## mean of the differences
## 0.04093319
In this mask, we see pretty much always a larger probability of classifying a face from correct trials.
ggplot(data=individual_trial_probs_melt %>% filter(level %in% c("low_load_correct_diff","high_load_correct_diff")),aes(x=TR,y=value))+
geom_line(aes(x=TR,y=0), linetype="dotted")+
geom_line(aes(color=level))+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ylab("Correct - Incorrect Diff in Probability of Classifying Faces")+
theme_classic()
In the templates, we see a similar structure as in the individual trials. However, instead of only seeing differences in load (like we saw in the fusiform data), we see a difference in accuracy, where there is higher probability of being able to classify a face from delay and probe periods in correct trials (regardless of load) than incorrect trials. We also see a difference in load during delay but not probe.
ggplot(data=template_preds_melt%>% filter(level %in% c("high_correct", "high_incorrect", "low_correct", "low_incorrect")),aes(x=TR,y=value))+
geom_line(aes(color=level))+
geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ylab("Probability of classifier predicting a face")+
theme_classic()
acc_data_delay <- data.frame(correct = rowMeans(cbind(averages_from_template[["high_correct"]]$V8,averages_from_template[["low_correct"]]$V8)), incorrect = rowMeans(cbind(averages_from_template[["high_incorrect"]]$V8, averages_from_template[["low_incorrect"]]$V8)))
t.test(acc_data_delay$correct,acc_data_delay$incorrect, paired=TRUE)
##
## Paired t-test
##
## data: acc_data_delay$correct and acc_data_delay$incorrect
## t = 2.4421, df = 37, p-value = 0.0195
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## 0.02502531 0.26883434
## sample estimates:
## mean of the differences
## 0.1469298
acc_data_probe <- data.frame(correct = rowMeans(cbind(averages_from_template[["high_correct"]]$V10,averages_from_template[["low_correct"]]$V10)), incorrect = rowMeans(cbind(averages_from_template[["high_incorrect"]]$V10, averages_from_template[["low_incorrect"]]$V10)))
t.test(acc_data_probe$correct,acc_data_probe$incorrect, paired=TRUE)
##
## Paired t-test
##
## data: acc_data_probe$correct and acc_data_probe$incorrect
## t = -0.10719, df = 37, p-value = 0.9152
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.1309449 0.1177870
## sample estimates:
## mean of the differences
## -0.006578947
load_data_delay <- data.frame(correct = rowMeans(cbind(averages_from_template[["high_correct"]]$V8,averages_from_template[["high_incorrect"]]$V8)), incorrect = rowMeans(cbind(averages_from_template[["low_correct"]]$V8, averages_from_template[["low_incorrect"]]$V8)))
t.test(load_data_delay$correct,load_data_delay$incorrect, paired=TRUE)
##
## Paired t-test
##
## data: load_data_delay$correct and load_data_delay$incorrect
## t = 0.27702, df = 37, p-value = 0.7833
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.09692873 0.12763049
## sample estimates:
## mean of the differences
## 0.01535088
load_data_probe <- data.frame(correct = rowMeans(cbind(averages_from_template[["high_correct"]]$V10,averages_from_template[["high_incorrect"]]$V10)), incorrect = rowMeans(cbind(averages_from_template[["low_correct"]]$V10, averages_from_template[["low_incorrect"]]$V10)))
t.test(load_data_probe$correct,load_data_probe$incorrect, paired=TRUE)
##
## Paired t-test
##
## data: load_data_probe$correct and load_data_probe$incorrect
## t = -0.34989, df = 37, p-value = 0.7284
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## -0.10424598 0.07354423
## sample estimates:
## mean of the differences
## -0.01535088
probe_data_template <- data.frame(high_correct=averages_from_template[["high_correct"]]$V10,
high_incorrect = averages_from_template[["high_incorrect"]]$V10,
low_correct = averages_from_template[["low_correct"]]$V10,
low_incorrect = averages_from_template[["low_incorrect"]]$V10)
probe_data_template <- melt(probe_data_template)
## No id variables; using all as measure variables
probe.aov <- aov(value ~ variable, data = probe_data_template)
summary(probe.aov)
## Df Sum Sq Mean Sq F value Pr(>F)
## variable 3 1.34 0.4469 2.835 0.0376 *
## Residuals 544 85.74 0.1576
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 132 observations deleted due to missingness
TukeyHSD(probe.aov)
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = value ~ variable, data = probe_data_template)
##
## $variable
## diff lwr upr p adj
## high_incorrect-high_correct -0.10490196 -0.21586743 0.006063505 0.0716155
## low_correct-high_correct -0.02254902 -0.13351449 0.088416447 0.9533703
## low_incorrect-high_correct -0.13519092 -0.31876557 0.048383734 0.2301619
## low_correct-high_incorrect 0.08235294 -0.02861253 0.193318407 0.2239269
## low_incorrect-high_incorrect -0.03028896 -0.21386361 0.153285694 0.9741612
## low_incorrect-low_correct -0.11264190 -0.29621655 0.070932753 0.3901911
I’m not 100% sure this is statistically valid, but the average prediction is higher for the predictions from template vs predictions from the individual trials.
compare_across_temp_indiv <- data.frame(template = rowMeans(cbind(averages_from_template[["high_correct"]]$V10,
averages_from_template[["high_incorrect"]]$V10,
averages_from_template[["low_correct"]]$V10)),
indiv = rowMeans(cbind(individual_trial_averages_probs[["high_correct"]]$V10,
individual_trial_averages_probs[["high_incorrect"]]$V10,
individual_trial_averages_probs[["low_correct"]]$V10)))
wilcox.test(compare_across_temp_indiv$template, compare_across_temp_indiv$indiv,paired=TRUE)
##
## Wilcoxon signed rank test with continuity correction
##
## data: compare_across_temp_indiv$template and compare_across_temp_indiv$indiv
## V = 11661, p-value = 8.175e-12
## alternative hypothesis: true location shift is not equal to 0
There is a difference between correct and incorrect trials at probe, which we saw in above data.
ggplot(data=template_preds_melt %>% filter(level %in% c("low_load_correct_diff","high_load_correct_diff")),aes(x=TR,y=value))+
geom_line(aes(color=level))+
geom_line(aes(x=TR,y=0),color="black",linetype="dotted")+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=level),alpha=0.2)+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ylab("Correct - Incorrect Diff in Probability of Classifying Faces")+
theme_classic()
probe_correct_diff_high <- data.frame(correct=averages_from_template[["high_correct"]]$V11, incorrect=averages_from_template[["high_incorrect"]]$V11)
t.test(probe_correct_diff_high$correct, probe_correct_diff_high$incorrect, paired=TRUE)
##
## Paired t-test
##
## data: probe_correct_diff_high$correct and probe_correct_diff_high$incorrect
## t = 2.2755, df = 169, p-value = 0.02413
## alternative hypothesis: true difference in means is not equal to 0
## 95 percent confidence interval:
## 0.009740628 0.137318195
## sample estimates:
## mean of the differences
## 0.07352941
split_template <- split_trial_type(averages_from_template,WM_groups)
split_indiv_probs <- split_trial_type(individual_trial_averages_probs, WM_groups)
There are no differences between working memory capacity groups.
indiv_avgs <- list()
for (i in seq.int(1,4)){
indiv_avgs[[i]] <- ggplot(data = split_indiv_probs[["avgs"]][[i]][["all"]])+
geom_line(aes(x=TR,y=mean,color=group))+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=group),alpha=0.2)+
geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(split_indiv_probs[["avgs"]])[i])+
ylab("Probability")+
theme_classic()
}
(indiv_avgs[[1]] + indiv_avgs[[2]]) / (indiv_avgs[[3]] + indiv_avgs[[4]])+
plot_layout(guides = "collect")+
plot_annotation(title="Probability of classifier predicting a face from individual trials")
for (trial_type in seq.int(1,4)){
print(names(split_indiv_probs[["all_data"]])[trial_type])
temp.aov <- aov(split_indiv_probs[["all_data"]][[trial_type]][["all"]][,8] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][,16])
print(summary(temp.aov))
print(TukeyHSD(temp.aov))
}
## [1] "high_correct"
## Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 2 0.0491 0.02457
## Residuals 165 2.6500 0.01606
## F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 1.53 0.22
## Residuals
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 8] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.035168780 -0.02147435 0.09181191 0.3087737
## low-high -0.002124985 -0.05876812 0.05451815 0.9956694
## low-med -0.037293765 -0.09393690 0.01934937 0.2672441
##
## [1] "high_incorrect"
## Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 2 0.109 0.05430
## Residuals 165 4.723 0.02863
## F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 1.897 0.153
## Residuals
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 8] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.050653735 -0.02496756 0.12627503 0.2553231
## low-high -0.006051985 -0.08167328 0.06956931 0.9804460
## low-med -0.056705720 -0.13232701 0.01891557 0.1817138
##
## [1] "low_correct"
## Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 2 0.0257 0.01285
## Residuals 165 2.1935 0.01329
## F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 0.966 0.383
## Residuals
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 8] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high -0.006304454 -0.05783846 0.04522955 0.9549160
## low-high -0.028809776 -0.08034378 0.02272423 0.3849016
## low-med -0.022505322 -0.07403933 0.02902869 0.5572494
##
## [1] "low_incorrect"
## Df Sum Sq Mean Sq
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 2 0.0965 0.04827
## Residuals 35 1.0821 0.03092
## F value Pr(>F)
## split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16] 1.561 0.224
## Residuals
## 130 observations deleted due to missingness
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 8] ~ split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_indiv_probs[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.12985209 -0.05990082 0.3196050 0.2290346
## low-high 0.10655151 -0.07083951 0.2839425 0.3173879
## low-med -0.02330058 -0.18554681 0.1389456 0.9343059
Similarly, no differences between groups at delay or probe in the template.
template_avgs <- list()
for (i in seq.int(1,4)){
template_avgs[[i]] <- ggplot(data = split_template[["avgs"]][[i]][["all"]])+
geom_line(aes(x=TR,y=mean,color=group))+
geom_ribbon(aes(x=TR,ymin=se_min,ymax=se_max,fill=group),alpha=0.2)+
geom_line(aes(x=TR,y=0.33),color="black",linetype="dotted")+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(split_template[["avgs"]])[i])+
ylab("Probability")+
theme_classic()
}
(template_avgs[[1]] + template_avgs[[2]]) / (template_avgs[[3]] + template_avgs[[4]])+
plot_layout(guides = "collect")+
plot_annotation(title="Probability of classifier predicting a face from trial templates")
for (trial_type in seq.int(1,4)){
print(names(split_template[["all_data"]])[trial_type])
temp.aov <- aov(split_template[["all_data"]][[trial_type]][["all"]][,10] ~ split_template[["all_data"]][[trial_type]][["all"]][,16])
print(summary(temp.aov))
print(TukeyHSD(temp.aov))
}
## [1] "high_correct"
## Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 2 0.043 0.02133
## Residuals 165 26.142 0.15844
## F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 0.135 0.874
## Residuals
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 10] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.01488095 -0.1630243 0.1927862 0.9786593
## low-high -0.02380952 -0.2017148 0.1540957 0.9462910
## low-med -0.03869048 -0.2165957 0.1392148 0.8644643
##
## [1] "high_incorrect"
## Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 2 0.195 0.09739
## Residuals 165 27.646 0.16755
## F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 0.581 0.56
## Residuals
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 10] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high -0.08333333 -0.2662862 0.09961952 0.5295098
## low-high -0.04464286 -0.2275957 0.13831000 0.8325301
## low-med 0.03869048 -0.1442624 0.22164333 0.8713337
##
## [1] "low_correct"
## Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 2 0.572 0.2859
## Residuals 165 24.734 0.1499
## F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 1.907 0.152
## Residuals
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 10] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high -0.13988095 -0.31292808 0.03316618 0.1385534
## low-high -0.04464286 -0.21768998 0.12840427 0.8147887
## low-med 0.09523810 -0.07780903 0.26828522 0.3962945
##
## [1] "low_incorrect"
## Df Sum Sq Mean Sq
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 2 0.319 0.1596
## Residuals 35 5.280 0.1509
## F value Pr(>F)
## split_template[["all_data"]][[trial_type]][["all"]][, 16] 1.058 0.358
## Residuals
## 130 observations deleted due to missingness
## Tukey multiple comparisons of means
## 95% family-wise confidence level
##
## Fit: aov(formula = split_template[["all_data"]][[trial_type]][["all"]][, 10] ~ split_template[["all_data"]][[trial_type]][["all"]][, 16])
##
## $`split_template[["all_data"]][[trial_type]][["all"]][, 16]`
## diff lwr upr p adj
## med-high 0.0462963 -0.3728535 0.4654461 0.9605675
## low-high -0.1546841 -0.5465274 0.2371592 0.6028130
## low-med -0.2009804 -0.5593700 0.1574092 0.3660521
We see that there is a significant negative correlation between classification averaged over time and accuracy for the high load correct trials, and a significant positive correlation between BPRS and the difference between correct and incorrect trials at high load.
indiv_avg_over_time <- data.frame(high_correct = rowMeans(individual_trial_averages_probs[["high_correct"]][,1:14]),
high_incorrect = rowMeans(individual_trial_averages_probs[["high_incorrect"]][,1:14]),
low_correct = rowMeans(individual_trial_averages_probs[["low_correct"]][,1:14]),
low_incorrect = rowMeans(individual_trial_averages_probs[["low_incorrect"]][,1:14],na.rm=TRUE),
high_load_diff_correct = rowMeans(individual_trial_averages_probs[["high_load_correct_diff"]][,1:14]),
low_load_diff_correct = rowMeans(individual_trial_averages_probs[["low_load_correct_diff"]][,1:14]))
indiv_avg_over_time[is.na(indiv_avg_over_time)] <- NA
indiv_avg_over_time <- data.frame(indiv_avg_over_time, omnibus_span = constructs_fMRI$omnibus_span_no_DFR_MRI, PTID = constructs_fMRI$PTID)
indiv_avg_over_time <- merge(indiv_avg_over_time, p200_data[,c("PTID","BPRS_TOT","XDFR_MRI_ACC_L3", "XDFR_MRI_ACC_L1")],by="PTID")
plot_list <- list()
for (i in seq.int(1,6)){
plot_data <- indiv_avg_over_time[,c(i+1,8:11)]
colnames(plot_data)[1] <- "prob"
plot_list[["omnibus"]][[i]] <- ggplot(data = plot_data,aes(y=prob,x=omnibus_span))+
geom_point()+
stat_smooth(method="lm")+
xlab("Omnibus Span")+
ggtitle(colnames(indiv_avg_over_time)[i+1])+
theme_classic()
plot_list[["DFR_Acc"]][[i]] <- ggplot(data = plot_data,aes(y=prob,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
xlab("High Load Acc")+
ggtitle(colnames(indiv_avg_over_time)[i+1])+
theme_classic()
plot_list[["BPRS"]][[i]] <- ggplot(data = plot_data,aes(y=prob,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
xlab("BPRS")+
ggtitle(colnames(indiv_avg_over_time)[i+1])+
theme_classic()
}
(plot_list[["omnibus"]][[1]] + plot_list[["omnibus"]][[2]]) /
(plot_list[["omnibus"]][[3]] + plot_list[["omnibus"]][[4]]) +
plot_annotation(title="Correlation of face probability and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["omnibus"]][[5]] + plot_list[["omnibus"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["DFR_Acc"]][[1]] + plot_list[["DFR_Acc"]][[2]]) /
(plot_list[["DFR_Acc"]][[3]] + plot_list[["DFR_Acc"]][[4]]) +
plot_annotation(title="Correlation of face probability and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["DFR_Acc"]][[5]] + plot_list[["DFR_Acc"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][[1]] + plot_list[["BPRS"]][[2]]) /
(plot_list[["BPRS"]][[3]] + plot_list[["BPRS"]][[4]]) +
plot_annotation(title="Correlation of face probability and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][[5]] + plot_list[["BPRS"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
cor.test(indiv_avg_over_time$high_load_diff_correct, indiv_avg_over_time$omnibus_span)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$high_load_diff_correct and indiv_avg_over_time$omnibus_span
## t = -1.1524, df = 168, p-value = 0.2508
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.23592699 0.06279379
## sample estimates:
## cor
## -0.08855745
cor.test(indiv_avg_over_time$high_correct, indiv_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$high_correct and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = -2.1785, df = 168, p-value = 0.03076
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.30856648 -0.01562585
## sample estimates:
## cor
## -0.1657503
cor.test(indiv_avg_over_time$low_load_diff_correct, indiv_avg_over_time$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$low_load_diff_correct and indiv_avg_over_time$XDFR_MRI_ACC_L3
## t = 0.99838, df = 36, p-value = 0.3248
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.1641581 0.4597008
## sample estimates:
## cor
## 0.1641391
cor.test(indiv_avg_over_time$high_load_diff_correct, indiv_avg_over_time$BPRS_TOT)
##
## Pearson's product-moment correlation
##
## data: indiv_avg_over_time$high_load_diff_correct and indiv_avg_over_time$BPRS_TOT
## t = 2.4136, df = 168, p-value = 0.01687
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.03347124 0.32463268
## sample estimates:
## cor
## 0.1830632
If we look at the patterns over time, we can see that BPRS tends to be positively related to classification only in the probe period during the high load trials, but starts to peak earlier in the low load trials. There is most correlation with accuracy during the encoding period. We also see a slight negative correlation with omnibus span during probe, particularly in the correct high load trials.
Next step is to pull out some of these correlations and see if they’re significant.
data_to_plot <- merge(constructs_fMRI,p200_data,by="PTID")
data_to_plot <- merge(data_to_plot,p200_clinical_zscores,by="PTID")
data_to_plot <- data_to_plot[,c(1,7,14,15,41,42)]
data_to_plot$ACC_LE <- data_to_plot$XDFR_MRI_ACC_L3 - data_to_plot$XDFR_MRI_ACC_L1
corr_to_behav_plots <- list()
for (i in seq.int(1,6)){
measure_by_time <- data.frame(matrix(nrow=4,ncol=14))
for (measure in seq.int(2,5)){
for (TR in seq.int(1,14)){
measure_by_time[measure-1,TR] <- cor(data_to_plot[,measure],individual_trial_averages_probs[[i]][,TR],use = "pairwise.complete.obs")
}
}
measure_by_time <- data.frame(t(measure_by_time))
measure_by_time$TR <- seq.int(1,14)
colnames(measure_by_time)[1:4] <- colnames(data_to_plot)[2:5]
melted_measure_by_time <- melt(measure_by_time,id.vars="TR")
corr_to_behav_plots[[names(individual_trial_averages_probs)[i]]] <- ggplot(data=melted_measure_by_time,aes(x=TR,y=value))+
geom_line(aes(color=variable))+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(individual_trial_averages_probs)[i])+
theme_classic()
}
(corr_to_behav_plots[[1]] + corr_to_behav_plots[[2]]) / (corr_to_behav_plots[[3]] + corr_to_behav_plots[[4]])+
plot_layout(guides="collect")+
plot_annotation(title = "Correlation between face classification and behavioral measures")
(corr_to_behav_plots[[5]] + corr_to_behav_plots[[6]])+
plot_layout(guides="collect")+
plot_annotation(title = "Correlation between difference across correctness in face classification and behavioral measures")
plot_list <- list()
for(trial_type in seq.int(1,6)){
temp_plot_data <- merge(p200_data, individual_trial_averages_probs[[trial_type]],by="PTID")
temp_plot_data <- merge(temp_plot_data,constructs_fMRI,by="PTID")
plot_list[["omnibus"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["omnibus"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["omnibus"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
# Acc
plot_list[["L3_Acc"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["L3_Acc"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["L3_Acc"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
# BPRS
plot_list[["BPRS"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["BPRS"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
plot_list[["BPRS"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(individual_trial_averages_probs)[trial_type])+
theme_classic()
}
We see significant negative relationships with omnibus span and classification at correct high load trials, negative relationships with high load accuracy at high load trials (regardless of accuracy), and positive relationships with BPRS total at both correct high load trials and the difference between correct and incorrect at high load.
(plot_list[["omnibus"]][["cue"]][[1]] + plot_list[["omnibus"]][["cue"]][[2]]) /
(plot_list[["omnibus"]][["cue"]][[3]] + plot_list[["omnibus"]][["cue"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["cue"]][[5]] + plot_list[["omnibus"]][["cue"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["cue"]][[1]] + plot_list[["L3_Acc"]][["cue"]][[2]]) /
(plot_list[["L3_Acc"]][["cue"]][[3]] + plot_list[["L3_Acc"]][["cue"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["cue"]][[5]] + plot_list[["L3_Acc"]][["cue"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["cue"]][[1]] + plot_list[["BPRS"]][["cue"]][[2]]) /
(plot_list[["BPRS"]][["cue"]][[3]] + plot_list[["BPRS"]][["cue"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["cue"]][[5]] + plot_list[["BPRS"]][["cue"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
cor.test(individual_trial_averages_probs[["high_correct"]]$V6,temp_plot_data$omnibus_span_no_DFR_MRI)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_correct"]]$V6 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = -2.1552, df = 168, p-value = 0.03257
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.30696253 -0.01385447
## sample estimates:
## cor
## -0.1640267
cor.test(individual_trial_averages_probs[["high_correct"]]$V6,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_correct"]]$V6 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -3.1221, df = 168, p-value = 0.002115
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.3715959 -0.0867219
## sample estimates:
## cor
## -0.2341794
cor.test(individual_trial_averages_probs[["high_incorrect"]]$V6,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_incorrect"]]$V6 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -2.3342, df = 168, p-value = 0.02077
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.31923147 -0.02745111
## sample estimates:
## cor
## -0.1772331
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V6,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V6 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 1.3525, df = 36, p-value = 0.1847
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.1073350 0.5041379
## sample estimates:
## cor
## 0.2198936
cor.test(individual_trial_averages_probs[["high_correct"]]$V6[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_correct"]]$V6[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = 2.294, df = 167, p-value = 0.02304
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.0244679 0.3173686
## sample estimates:
## cor
## 0.1747823
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V6[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_load_correct_diff"]]$V6[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = 2.9414, df = 167, p-value = 0.003731
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.07343971 0.36081067
## sample estimates:
## cor
## 0.2219392
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V6[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V6[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = 1.9384, df = 35, p-value = 0.06067
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.01406935 0.57715882
## sample estimates:
## cor
## 0.3113691
There are no significant relationships between individual differences and classification probability at delay.
(plot_list[["omnibus"]][["delay"]][[1]] + plot_list[["omnibus"]][["delay"]][[2]]) /
(plot_list[["omnibus"]][["delay"]][[3]] + plot_list[["omnibus"]][["delay"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["delay"]][[5]] + plot_list[["omnibus"]][["delay"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["delay"]][[1]] + plot_list[["L3_Acc"]][["delay"]][[2]]) /
(plot_list[["L3_Acc"]][["delay"]][[3]] + plot_list[["L3_Acc"]][["delay"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["delay"]][[5]] + plot_list[["L3_Acc"]][["delay"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["delay"]][[1]] + plot_list[["BPRS"]][["delay"]][[2]]) /
(plot_list[["BPRS"]][["delay"]][[3]] + plot_list[["BPRS"]][["delay"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["delay"]][[5]] + plot_list[["BPRS"]][["delay"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
cor.test(individual_trial_averages_probs[["low_incorrect"]]$V8,temp_plot_data$omnibus_span_no_DFR_MRI)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_incorrect"]]$V8 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = -1.8432, df = 36, p-value = 0.07354
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.56070260 0.02872348
## sample estimates:
## cor
## -0.2936563
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V8,temp_plot_data$omnibus_span_no_DFR_MRI)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V8 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 1.4454, df = 36, p-value = 0.157
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.09240678 0.51529876
## sample estimates:
## cor
## 0.2341949
cor.test(individual_trial_averages_probs[["low_correct"]]$V8,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_correct"]]$V8 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 1.0317, df = 168, p-value = 0.3037
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.07203102 0.22714506
## sample estimates:
## cor
## 0.07934345
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V8,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V8 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -0.46037, df = 36, p-value = 0.648
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.3867289 0.2492761
## sample estimates:
## cor
## -0.07650382
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V8[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_load_correct_diff"]]$V8[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = 1.8327, df = 167, p-value = 0.06863
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.01077445 0.28532597
## sample estimates:
## cor
## 0.140414
cor.test(individual_trial_averages_probs[["high_incorrect"]]$V8[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_incorrect"]]$V8[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = -1.5037, df = 167, p-value = 0.1345
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.26197179 0.03600553
## sample estimates:
## cor
## -0.1155827
The only significant relationship here is in the difference between correct and incorrect trials at high load and high load accuracy.
(plot_list[["omnibus"]][["probe"]][[1]] + plot_list[["omnibus"]][["probe"]][[2]]) /
(plot_list[["omnibus"]][["probe"]][[3]] + plot_list[["omnibus"]][["probe"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["probe"]][[5]] + plot_list[["omnibus"]][["probe"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["probe"]][[1]] + plot_list[["L3_Acc"]][["probe"]][[2]]) /
(plot_list[["L3_Acc"]][["probe"]][[3]] + plot_list[["L3_Acc"]][["probe"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["probe"]][[5]] + plot_list[["L3_Acc"]][["probe"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["probe"]][[1]] + plot_list[["BPRS"]][["probe"]][[2]]) /
(plot_list[["BPRS"]][["probe"]][[3]] + plot_list[["BPRS"]][["probe"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["probe"]][[5]] + plot_list[["BPRS"]][["probe"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
cor.test(individual_trial_averages_probs[["low_incorrect"]]$V11,temp_plot_data$omnibus_span_no_DFR_MRI)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_incorrect"]]$V11 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = 0.80216, df = 36, p-value = 0.4277
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.1954495 0.4338193
## sample estimates:
## cor
## 0.1325136
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V11,temp_plot_data$omnibus_span_no_DFR_MRI)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_load_correct_diff"]]$V11 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = -1.614, df = 168, p-value = 0.1084
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.26907807 0.02745656
## sample estimates:
## cor
## -0.1235683
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V11,temp_plot_data$omnibus_span_no_DFR_MRI)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V11 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = -1.1221, df = 36, p-value = 0.2692
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.4755697 0.1443304
## sample estimates:
## cor
## -0.1838352
cor.test(individual_trial_averages_probs[["low_incorrect"]]$V11,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_incorrect"]]$V11 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -1.2553, df = 36, p-value = 0.2175
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.4922420 0.1229486
## sample estimates:
## cor
## -0.2047837
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V11,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_load_correct_diff"]]$V11 and temp_plot_data$XDFR_MRI_ACC_L3
## t = -2.052, df = 168, p-value = 0.04172
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.299826702 -0.005995973
## sample estimates:
## cor
## -0.1563691
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V11,temp_plot_data$XDFR_MRI_ACC_L3)
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V11 and temp_plot_data$XDFR_MRI_ACC_L3
## t = 1.46, df = 36, p-value = 0.153
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.09005669 0.51703719
## sample estimates:
## cor
## 0.2364335
cor.test(individual_trial_averages_probs[["low_correct"]]$V11[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_correct"]]$V11[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = 0.94546, df = 167, p-value = 0.3458
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.07886148 0.22148753
## sample estimates:
## cor
## 0.07296722
cor.test(individual_trial_averages_probs[["high_load_correct_diff"]]$V11[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["high_load_correct_diff"]]$V11[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = 1.487, df = 167, p-value = 0.1389
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.03729089 0.26077262
## sample estimates:
## cor
## 0.1143127
cor.test(individual_trial_averages_probs[["low_load_correct_diff"]]$V11[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: individual_trial_averages_probs[["low_load_correct_diff"]]$V11[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = -1.2811, df = 35, p-value = 0.2086
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.5012841 0.1206518
## sample estimates:
## cor
## -0.211641
behav_classification_corr_list <- list()
for (trial_type in seq.int(1,6)){
group_corrs_omnibus <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_omnibus) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_omnibus) <- seq.int(1,14)
group_corrs_acc <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_acc) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_acc) <- seq.int(1,14)
group_corrs_BPRS <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_BPRS) <- names(split_indiv_probs[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_BPRS) <- seq.int(1,14)
for (level in seq.int(1,3)){
temp_subj <- split_indiv_probs[["all_data"]][[trial_type]][[level]][order(split_indiv_probs[["all_data"]][[trial_type]][[level]]$PTID),]
temp_data <- data_to_plot[data_to_plot$PTID %in% split_indiv_probs[["all_data"]][[trial_type]][[level]]$PTID,]
for (TR in seq.int(1,14)){
group_corrs_omnibus[level,TR] <- cor(temp_subj[,TR],temp_data$omnibus_span_no_DFR_MRI,use="pairwise.complete.obs")
group_corrs_acc[level,TR] <- cor(temp_subj[,TR],temp_data$XDFR_MRI_ACC_L3,use="pairwise.complete.obs")
group_corrs_BPRS[level,TR] <- cor(temp_subj[,TR],temp_data$BPRS_TOT.x,use="pairwise.complete.obs")
}
group_corrs_acc$level <- factor(rownames(group_corrs_acc))
group_corrs_BPRS$level <- factor(rownames(group_corrs_acc))
group_corrs_omnibus$level <- factor(rownames(group_corrs_acc))
}
behav_classification_corr_list[["omnibus"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_omnibus
behav_classification_corr_list[["BPRS"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_BPRS
behav_classification_corr_list[["L3_Acc"]][[names(split_indiv_probs[["all_data"]])[trial_type]]] <- group_corrs_acc
}
behav_classification_corr_melt <- list()
behav_split_plot_list <- list()
for (measure in seq.int(1,3)){
for (trial_type in seq.int(1,6)){
behav_classification_corr_melt[[names(behav_classification_corr_list)[measure]]][[names(behav_classification_corr_list[[measure]])[trial_type]]] <- melt(behav_classification_corr_list[[measure]][[trial_type]],id.vars="level")
behav_classification_corr_melt[[measure]][[trial_type]]$variable <- as.numeric(as.character(behav_classification_corr_melt[[measure]][[trial_type]]$variable))
behav_classification_corr_melt[[measure]][[trial_type]]$level <- factor(behav_classification_corr_melt[[measure]][[trial_type]]$level, levels=c("high","med","low"))
behav_split_plot_list[[names(behav_classification_corr_melt)[measure]]][[names(behav_classification_corr_melt[[measure]])[trial_type]]] <-
ggplot(data = behav_classification_corr_melt[[measure]][[trial_type]],aes(x=variable,y=value))+
geom_line(aes(color=level))+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(behav_classification_corr_list[[measure]])[trial_type])+
xlab("TR")+
ylab("Correlation")+
theme_classic()
}
}
(behav_split_plot_list[["omnibus"]][[1]] + behav_split_plot_list[["omnibus"]][[2]]) /
(behav_split_plot_list[["omnibus"]][[3]] + behav_split_plot_list[["omnibus"]][[4]])+
plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["omnibus"]][[5]] + behav_split_plot_list[["omnibus"]][[6]]) +
plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["L3_Acc"]][[1]] + behav_split_plot_list[["L3_Acc"]][[2]]) /
(behav_split_plot_list[["L3_Acc"]][[3]] + behav_split_plot_list[["L3_Acc"]][[4]])+
plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["L3_Acc"]][[5]] + behav_split_plot_list[["L3_Acc"]][[6]]) +
plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["BPRS"]][[1]] + behav_split_plot_list[["BPRS"]][[2]]) /
(behav_split_plot_list[["BPRS"]][[3]] + behav_split_plot_list[["BPRS"]][[4]])+
plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["BPRS"]][[5]] + behav_split_plot_list[["BPRS"]][[6]]) +
plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
There are no relationships with individual differences from the templates averaged over time.
template_avg_over_time <- data.frame(high_correct = rowMeans(averages_from_template[["high_correct"]][,1:14]),
high_incorrect = rowMeans(averages_from_template[["high_incorrect"]][,1:14]),
low_correct = rowMeans(averages_from_template[["low_correct"]][,1:14]),
low_incorrect = rowMeans(averages_from_template[["low_incorrect"]][,1:14],na.rm=TRUE),
high_load_diff_correct = rowMeans(averages_from_template[["high_load_correct_diff"]][,1:14]),
low_load_diff_correct = rowMeans(averages_from_template[["low_load_correct_diff"]][,1:14]))
template_avg_over_time[is.na(template_avg_over_time)] <- NA
template_avg_over_time <- data.frame(template_avg_over_time, omnibus_span = constructs_fMRI$omnibus_span_no_DFR_MRI, PTID = constructs_fMRI$PTID)
template_avg_over_time <- merge(template_avg_over_time, p200_data[,c("PTID","BPRS_TOT","XDFR_MRI_ACC_L3", "XDFR_MRI_ACC_L1")],by="PTID")
plot_list <- list()
for (i in seq.int(1,6)){
plot_data <- template_avg_over_time[,c(i+1,8:11)]
colnames(plot_data)[1] <- "prob"
plot_list[["omnibus"]][[i]] <- ggplot(data = plot_data,aes(y=prob,x=omnibus_span))+
geom_point()+
stat_smooth(method="lm")+
xlab("Probability")+
ggtitle(colnames(template_avg_over_time)[i+1])+
theme_classic()
plot_list[["DFR_Acc"]][[i]] <- ggplot(data = plot_data,aes(y=prob,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
xlab("Probability")+
ggtitle(colnames(template_avg_over_time)[i+1])+
theme_classic()
plot_list[["BPRS"]][[i]] <- ggplot(data = plot_data,aes(y=prob,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
xlab("Probability")+
ggtitle(colnames(template_avg_over_time)[i+1])+
theme_classic()
}
(plot_list[["omnibus"]][[1]] + plot_list[["omnibus"]][[2]]) /
(plot_list[["omnibus"]][[3]] + plot_list[["omnibus"]][[4]]) +
plot_annotation(title="Correlation of face probability and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["omnibus"]][[5]] + plot_list[["omnibus"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and Omnibus Span")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["DFR_Acc"]][[1]] + plot_list[["DFR_Acc"]][[2]]) /
(plot_list[["DFR_Acc"]][[3]] + plot_list[["DFR_Acc"]][[4]]) +
plot_annotation(title="Correlation of face probability and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["DFR_Acc"]][[5]] + plot_list[["DFR_Acc"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and DFR High Load Accuracy")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][[1]] + plot_list[["BPRS"]][[2]]) /
(plot_list[["BPRS"]][[3]] + plot_list[["BPRS"]][[4]]) +
plot_annotation(title="Correlation of face probability and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][[5]] + plot_list[["BPRS"]][[6]])+
plot_annotation(title="Correlation of difference in face probability across correctness and BPRS")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
data_to_plot <- merge(constructs_fMRI,p200_data,by="PTID")
data_to_plot <- merge(data_to_plot,p200_clinical_zscores,by="PTID")
data_to_plot <- data_to_plot[,c(1,7,14,15,41,42)]
data_to_plot$ACC_LE <- data_to_plot$XDFR_MRI_ACC_L3 - data_to_plot$XDFR_MRI_ACC_L1
corr_to_behav_plots <- list()
for (i in seq.int(1,6)){
measure_by_time <- data.frame(matrix(nrow=4,ncol=14))
for (measure in seq.int(2,5)){
for (TR in seq.int(1,14)){
measure_by_time[measure-1,TR] <- cor(data_to_plot[,measure],averages_from_template[[i]][,TR],use = "pairwise.complete.obs")
}
}
measure_by_time <- data.frame(t(measure_by_time))
measure_by_time$TR <- seq.int(1,14)
colnames(measure_by_time)[1:4] <- colnames(data_to_plot)[2:5]
melted_measure_by_time <- melt(measure_by_time,id.vars="TR")
corr_to_behav_plots[[names(averages_from_template)[i]]] <- ggplot(data=melted_measure_by_time,aes(x=TR,y=value))+
geom_line(aes(color=variable))+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(averages_from_template)[i])+
theme_classic()
}
(corr_to_behav_plots[[1]] + corr_to_behav_plots[[2]]) / (corr_to_behav_plots[[3]] + corr_to_behav_plots[[4]])+
plot_layout(guides="collect")+
plot_annotation(title = "Correlation between face classification and behavioral measures")
(corr_to_behav_plots[[5]] + corr_to_behav_plots[[6]])+
plot_layout(guides="collect")+
plot_annotation(title = "Correlation between face classification and behavioral measures")
plot_list <- list()
for(trial_type in seq.int(1,6)){
temp_plot_data <- merge(p200_data, averages_from_template[[trial_type]],by="PTID")
temp_plot_data <- merge(temp_plot_data,constructs_fMRI,by="PTID")
plot_list[["omnibus"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["omnibus"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["omnibus"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=omnibus_span_no_DFR_MRI))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
# Acc
plot_list[["L3_Acc"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["L3_Acc"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["L3_Acc"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=XDFR_MRI_ACC_L3))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
# BPRS
plot_list[["BPRS"]][["cue"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V6,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["BPRS"]][["delay"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V8,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
plot_list[["BPRS"]][["probe"]][[trial_type]] <- ggplot(data=temp_plot_data,aes(y=V11,x=BPRS_TOT))+
geom_point()+
stat_smooth(method="lm")+
ylab("Probability")+
ggtitle(names(averages_from_template)[trial_type])+
theme_classic()
}
(plot_list[["omnibus"]][["cue"]][[1]] + plot_list[["omnibus"]][["cue"]][[2]]) /
(plot_list[["omnibus"]][["cue"]][[3]] + plot_list[["omnibus"]][["cue"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["cue"]][[5]] + plot_list[["omnibus"]][["cue"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["cue"]][[1]] + plot_list[["L3_Acc"]][["cue"]][[2]]) /
(plot_list[["L3_Acc"]][["cue"]][[3]] + plot_list[["L3_Acc"]][["cue"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["cue"]][[5]] + plot_list[["L3_Acc"]][["cue"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["cue"]][[1]] + plot_list[["BPRS"]][["cue"]][[2]]) /
(plot_list[["BPRS"]][["cue"]][[3]] + plot_list[["BPRS"]][["cue"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["cue"]][[5]] + plot_list[["BPRS"]][["cue"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at Cue Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
There are no significant relationships.
(plot_list[["omnibus"]][["delay"]][[1]] + plot_list[["omnibus"]][["delay"]][[2]]) /
(plot_list[["omnibus"]][["delay"]][[3]] + plot_list[["omnibus"]][["delay"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["delay"]][[5]] + plot_list[["omnibus"]][["delay"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["delay"]][[1]] + plot_list[["L3_Acc"]][["delay"]][[2]]) /
(plot_list[["L3_Acc"]][["delay"]][[3]] + plot_list[["L3_Acc"]][["delay"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["delay"]][[5]] + plot_list[["L3_Acc"]][["delay"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["delay"]][[1]] + plot_list[["BPRS"]][["delay"]][[2]]) /
(plot_list[["BPRS"]][["delay"]][[3]] + plot_list[["BPRS"]][["delay"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["delay"]][[5]] + plot_list[["BPRS"]][["delay"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at delay Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
There is a negative relationship between classification probability at low load correct trials and omnibus span.
(plot_list[["omnibus"]][["probe"]][[1]] + plot_list[["omnibus"]][["probe"]][[2]]) /
(plot_list[["omnibus"]][["probe"]][[3]] + plot_list[["omnibus"]][["probe"]][[4]]) +
plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["omnibus"]][["probe"]][[5]] + plot_list[["omnibus"]][["probe"]][[6]]) +
plot_annotation(title = "Omnibus vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["probe"]][[1]] + plot_list[["L3_Acc"]][["probe"]][[2]]) /
(plot_list[["L3_Acc"]][["probe"]][[3]] + plot_list[["L3_Acc"]][["probe"]][[4]]) +
plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["L3_Acc"]][["probe"]][[5]] + plot_list[["L3_Acc"]][["probe"]][[6]]) +
plot_annotation(title = "L3_Acc vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["probe"]][[1]] + plot_list[["BPRS"]][["probe"]][[2]]) /
(plot_list[["BPRS"]][["probe"]][[3]] + plot_list[["BPRS"]][["probe"]][[4]]) +
plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
(plot_list[["BPRS"]][["probe"]][[5]] + plot_list[["BPRS"]][["probe"]][[6]]) +
plot_annotation(title = "BPRS vs Classification at probe Period")
## `geom_smooth()` using formula 'y ~ x'
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 132 rows containing non-finite values (stat_smooth).
## Warning: Removed 132 rows containing missing values (geom_point).
cor.test(averages_from_template[["low_correct"]]$V11,temp_plot_data$omnibus_span_no_DFR_MRI)
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["low_correct"]]$V11 and temp_plot_data$omnibus_span_no_DFR_MRI
## t = -2.0511, df = 168, p-value = 0.04181
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.299766378 -0.005929694
## sample estimates:
## cor
## -0.1563044
cor.test(averages_from_template[["high_correct"]]$V11[temp_plot_data$BPRS_TOT < 70],temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70])
##
## Pearson's product-moment correlation
##
## data: averages_from_template[["high_correct"]]$V11[temp_plot_data$BPRS_TOT < 70] and temp_plot_data$BPRS_TOT[temp_plot_data$BPRS_TOT < 70]
## t = 1.1909, df = 167, p-value = 0.2354
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## -0.06002263 0.23941226
## sample estimates:
## cor
## 0.09176892
behav_classification_corr_list <- list()
for (trial_type in seq.int(1,6)){
group_corrs_omnibus <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_omnibus) <- names(split_template[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_omnibus) <- seq.int(1,14)
group_corrs_acc <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_acc) <- names(split_template[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_acc) <- seq.int(1,14)
group_corrs_BPRS <- data.frame(matrix(nrow=3,ncol=14))
rownames(group_corrs_BPRS) <- names(split_template[["all_data"]][[trial_type]])[1:3]
colnames(group_corrs_BPRS) <- seq.int(1,14)
for (level in seq.int(1,3)){
temp_subj <- split_template[["all_data"]][[trial_type]][[level]][order(split_template[["all_data"]][[trial_type]][[level]]$PTID),]
temp_data <- data_to_plot[data_to_plot$PTID %in% split_template[["all_data"]][[trial_type]][[level]]$PTID,]
for (TR in seq.int(1,14)){
group_corrs_omnibus[level,TR] <- cor(temp_subj[,TR],temp_data$omnibus_span_no_DFR_MRI,use="pairwise.complete.obs")
group_corrs_acc[level,TR] <- cor(temp_subj[,TR],temp_data$XDFR_MRI_ACC_L3,use="pairwise.complete.obs")
group_corrs_BPRS[level,TR] <- cor(temp_subj[,TR],temp_data$BPRS_TOT.x,use="pairwise.complete.obs")
}
group_corrs_acc$level <- factor(rownames(group_corrs_acc))
group_corrs_BPRS$level <- factor(rownames(group_corrs_acc))
group_corrs_omnibus$level <- factor(rownames(group_corrs_acc))
}
behav_classification_corr_list[["omnibus"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_omnibus
behav_classification_corr_list[["BPRS"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_BPRS
behav_classification_corr_list[["L3_Acc"]][[names(split_template[["all_data"]])[trial_type]]] <- group_corrs_acc
}
behav_classification_corr_melt <- list()
behav_split_plot_list <- list()
for (measure in seq.int(1,3)){
for (trial_type in seq.int(1,6)){
behav_classification_corr_melt[[names(behav_classification_corr_list)[measure]]][[names(behav_classification_corr_list[[measure]])[trial_type]]] <- melt(behav_classification_corr_list[[measure]][[trial_type]],id.vars="level")
behav_classification_corr_melt[[measure]][[trial_type]]$variable <- as.numeric(as.character(behav_classification_corr_melt[[measure]][[trial_type]]$variable))
behav_classification_corr_melt[[measure]][[trial_type]]$level <- factor(behav_classification_corr_melt[[measure]][[trial_type]]$level, levels=c("high","med","low"))
behav_split_plot_list[[names(behav_classification_corr_melt)[measure]]][[names(behav_classification_corr_melt[[measure]])[trial_type]]] <-
ggplot(data = behav_classification_corr_melt[[measure]][[trial_type]],aes(x=variable,y=value))+
geom_line(aes(color=level))+
scale_x_continuous(breaks = c(1:14),labels=c(1:14))+
ggtitle(names(behav_classification_corr_list[[measure]])[trial_type])+
xlab("TR")+
ylab("Correlation")+
theme_classic()
}
}
(behav_split_plot_list[["omnibus"]][[1]] + behav_split_plot_list[["omnibus"]][[2]]) /
(behav_split_plot_list[["omnibus"]][[3]] + behav_split_plot_list[["omnibus"]][[4]])+
plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["omnibus"]][[5]] + behav_split_plot_list[["omnibus"]][[6]]) +
plot_annotation("Omnibus Span Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["L3_Acc"]][[1]] + behav_split_plot_list[["L3_Acc"]][[2]]) /
(behav_split_plot_list[["L3_Acc"]][[3]] + behav_split_plot_list[["L3_Acc"]][[4]])+
plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["L3_Acc"]][[5]] + behav_split_plot_list[["L3_Acc"]][[6]]) +
plot_annotation("High Load Acc Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["BPRS"]][[1]] + behav_split_plot_list[["BPRS"]][[2]]) /
(behav_split_plot_list[["BPRS"]][[3]] + behav_split_plot_list[["BPRS"]][[4]])+
plot_annotation("BPRS Total Correlation with Face Classification Probability by Group")+
plot_layout(guides="collect")
(behav_split_plot_list[["BPRS"]][[5]] + behav_split_plot_list[["BPRS"]][[6]]) +
plot_annotation("BPRS Total with Face Classification Probability by Group")+
plot_layout(guides="collect")